{
case PGT_l1_page_table:
free_l1_table(page);
- if ( unlikely(current->mm.shadow_mode) &&
- (get_shadow_status(¤t->mm,
- page-frame_table) & PSH_shadowed) )
- {
- /* using 'current-mm' is safe because page type changes only
- occur within the context of the currently running domain as
- pagetable pages can not be shared across domains. The one
- exception is when destroying a domain. However, we get away
- with this as there's no way the current domain can have this
- mfn shadowed, so we won't get here... Phew! */
-
- unshadow_table( page-frame_table, type );
- put_shadow_status(¤t->mm);
+ if ( unlikely(current->mm.shadow_mode) &&
+ (get_shadow_status(¤t->mm,
+ page-frame_table) & PSH_shadowed) )
+ {
+ /* using 'current-mm' is safe because page type changes only
+ occur within the context of the currently running domain as
+ pagetable pages can not be shared across domains. The one
+ exception is when destroying a domain. However, we get away
+ with this as there's no way the current domain can have this
+ mfn shadowed, so we won't get here... Phew! */
+
+ unshadow_table( page-frame_table, type );
+ put_shadow_status(¤t->mm);
}
- return;
+ return;
case PGT_l2_page_table:
free_l2_table(page);
- if ( unlikely(current->mm.shadow_mode) &&
- (get_shadow_status(¤t->mm,
- page-frame_table) & PSH_shadowed) )
- {
- unshadow_table( page-frame_table, type );
- put_shadow_status(¤t->mm);
+ if ( unlikely(current->mm.shadow_mode) &&
+ (get_shadow_status(¤t->mm,
+ page-frame_table) & PSH_shadowed) )
+ {
+ unshadow_table( page-frame_table, type );
+ put_shadow_status(¤t->mm);
}
- return;
+ return;
default:
BUG();
case MMUEXT_PIN_L2_TABLE:
okay = get_page_and_type_from_pagenr(
pfn, (cmd == MMUEXT_PIN_L2_TABLE) ? PGT_l2_page_table :
- PGT_l1_page_table,
+ PGT_l1_page_table,
CHECK_STRICT);
if ( unlikely(!okay) )
{
shadow_mk_pagetable(¤t->mm);
- write_ptbase(¤t->mm);
+ write_ptbase(¤t->mm);
put_page_and_type(&frame_table[old_base_pfn]);
}
okay = mod_l1_entry((l1_pgentry_t *)va,
mk_l1_pgentry(req.val));
- if ( okay && unlikely(current->mm.shadow_mode) &&
- (get_shadow_status(¤t->mm, page-frame_table) &
- PSH_shadowed) )
- {
- shadow_l1_normal_pt_update( req.ptr, req.val,
- &prev_spfn, &prev_spl1e );
- put_shadow_status(¤t->mm);
- }
+ if ( okay && unlikely(current->mm.shadow_mode) &&
+ (get_shadow_status(¤t->mm, page-frame_table) &
+ PSH_shadowed) )
+ {
+ shadow_l1_normal_pt_update( req.ptr, req.val,
+ &prev_spfn, &prev_spl1e );
+ put_shadow_status(¤t->mm);
+ }
put_page_type(page);
}
mk_l2_pgentry(req.val),
pfn);
- if ( okay && unlikely(current->mm.shadow_mode) &&
- (get_shadow_status(¤t->mm, page-frame_table) &
- PSH_shadowed) )
- {
- shadow_l2_normal_pt_update( req.ptr, req.val );
- put_shadow_status(¤t->mm);
- }
+ if ( okay && unlikely(current->mm.shadow_mode) &&
+ (get_shadow_status(¤t->mm, page-frame_table) &
+ PSH_shadowed) )
+ {
+ shadow_l2_normal_pt_update( req.ptr, req.val );
+ put_shadow_status(¤t->mm);
+ }
put_page_type(page);
}
*(unsigned long *)va = req.val;
okay = 1;
put_page_type(page);
-
- // at present, we don't shadowing such pages
}
break;
}
if ( unlikely(page_nr >= (HYPERVISOR_VIRT_START >> PAGE_SHIFT)) )
return -EINVAL;
- // XXX when we make this support 4MB pages we should also
- // deal with the case of updating L2s
+ /*
+ * XXX When we make this support 4MB superpages we should also deal with
+ * the case of updating L2 entries.
+ */
if ( unlikely(!mod_l1_entry(&linear_pg_table[page_nr],
mk_l1_pgentry(val))) )
{
unsigned long sval;
- l1pte_no_fault( ¤t->mm, &val, &sval );
+ l1pte_no_fault( ¤t->mm, &val, &sval );
- if ( unlikely(__put_user( sval, ((unsigned long *) (&shadow_linear_pg_table[page_nr])) ) ) )
- {
- // Since L2's are guranteed RW, failure indicates the page
- // was not shadowed, so ignore.
+ if ( unlikely(__put_user(sval, ((unsigned long *)(
+ &shadow_linear_pg_table[page_nr])))) )
+ {
+ /*
+ * Since L2's are guranteed RW, failure indicates the page was not
+ * shadowed, so ignore.
+ */
perfc_incrc(shadow_update_va_fail);
- //MEM_LOG("update_va_map: couldn't write update\n");
- }
+ }
- check_pagetable( p, p->mm.pagetable, "va" ); // debug
-
+ check_pagetable( p, p->mm.pagetable, "va" ); /* debug */
}
-
deferred_ops = percpu_info[cpu].deferred_ops;
percpu_info[cpu].deferred_ops = 0;
/* check ref count for leaf pages */
if ( ((frame_table[i].type_and_flags & PGT_type_mask) ==
- PGT_writeable_page) )
+ PGT_writeable_page) )
{
ref_count = 0;
struct task_struct *p = current;
if ( SCHED_OP(alloc_task, p) < 0)
- panic("Failed to allocate scheduler private data for idle task");
+ panic("Failed to allocate scheduler private data for idle task");
SCHED_OP(add_task, p);
spin_lock_irqsave(&schedule_lock[p->processor], flags);
}
-/* sched_pause_sync - synchronously pause a domain's execution
-
-XXXX This is horibly broken -- here just as a place holder at present,
- do not use.
-
-*/
-
+/*
+ * sched_pause_sync - synchronously pause a domain's execution
+ * XXXX This is horibly broken -- here just as a place holder at present,
+ * do not use.
+ */
void sched_pause_sync(struct task_struct *p)
{
unsigned long flags;
spin_lock_irqsave(&schedule_lock[cpu], flags);
+ /* If not the current task, we can remove it from scheduling now. */
if ( schedule_data[cpu].curr != p )
- /* if not the current task, we can remove it from scheduling now */
SCHED_OP(pause, p);
p->state = TASK_PAUSED;
spin_unlock_irqrestore(&schedule_lock[cpu], flags);
- /* spin until domain is descheduled by its local scheduler */
+ /* Spin until domain is descheduled by its local scheduler. */
while ( schedule_data[cpu].curr == p )
{
- send_hyp_event(p, _HYP_EVENT_NEED_RESCHED );
- do_yield();
+ send_hyp_event(p, _HYP_EVENT_NEED_RESCHED );
+ do_yield();
}
-
-
+
/* The domain will not be scheduled again until we do a wake_up(). */
}
spinlock_t shadow_lock;
struct shadow_status *shadow_ht;
struct shadow_status *shadow_ht_free;
- struct shadow_status *shadow_ht_extras; // extra allocation units
+ struct shadow_status *shadow_ht_extras; /* extra allocation units */
unsigned int *shadow_dirty_bitmap;
- unsigned int shadow_dirty_bitmap_size; // in pages, bit per page
+ unsigned int shadow_dirty_bitmap_size; /* in pages, bit per page */
unsigned int shadow_page_count;
unsigned int shadow_max_page_count;
unsigned int shadow_extras_count;
static inline void write_ptbase( struct mm_struct *m )
{
-/* printk("write_ptbase mode=%08x pt=%08lx st=%08lx\n",
- m->shadow_mode, pagetable_val(m->pagetable),
- pagetable_val(m->shadow_table) );
- */
- if( m->shadow_mode )
- {
- //check_pagetable( m, m->pagetable, "write_ptbase" );
- write_cr3_counted(pagetable_val(m->shadow_table));
- }
+ if ( unlikely(m->shadow_mode) )
+ write_cr3_counted(pagetable_val(m->shadow_table));
else
- write_cr3_counted(pagetable_val(m->pagetable));
+ write_cr3_counted(pagetable_val(m->pagetable));
}
-
#define IDLE0_MM \
{ \
perdomain_pt: 0, \